www.gusucode.com > 支持向量机工具箱 - LIBSVM OSU_SVM LS_SVM源码程序 > 支持向量机工具箱 - LIBSVM OSU_SVM LS_SVM\stprtool\svm\svm2mot.m

    function [Alpha,bias,nsv,exitflag,flps,margin,trn_err]=...
    svm2mot(X,I,ker,arg,C,epsilon)
% SVM2MOT learns SVM (L2) using Matlab Optimization Toolbox.
% [Alpha,bias,nsv,eflag,flps,margin,trn_err]=svm2mot(X,I,ker,arg,C,epsilon)
%
% SVM2MOT solves binary Support Vector Machines problem with 
%  L2 soft margin (classification violations are quadratically
%  penalized) by the use of Matlab Optimization Toolbox.
%
%  For classification use SVMCLASS.
%
% Mandatory inputs:
%  X [NxL] L training patterns in N-dimensional space.
%  I [1xL] labels of training patterns (1 - 1st, 2 - 2nd class ).
%  ker [string] identifier of kernel, see help kernel.
%  arg [...] arguments of given kernel, see help kernel.
%  C [real] trade-off between margin and training error.
%
% Optional inputs:
%  epsilon [real] numerical precision for SVs, if Alpha(i) > epsilon
%      then i-th pattern is Support Vector. Default is 1e-9.
% 
% Outputs:
%  Alpha [1xL] Lagrange multipliers for training patterns.
%  bias [real] bias of decision function.
%  nsv [uint] number of Support Vectors, i.e number of patterns
%     with non-zero ( > epsilon) Lagrangians.
%  eflag [int] exit flag of qp function, see 'help qp'.
%  flps [uint] number of used floating point operations.
%  margin [real] margin between classes.
%  trn_err [real] is the training error (empirical risk).
%
% See also SVMCLASS, SVM.
%


% Statistical Pattern Recognition Toolbox, Vojtech Franc, Vaclav Hlavac
% (c) Czech Technical University Prague, http://cmp.felk.cvut.cz
% Written Vojtech Franc (diploma thesis) 23.12.1999, 5.4.2000
% Modifications
% 23-Oct-2001, V.Franc
% 19-September-2001, V.Franc, name changed to svm2mot.
% 8-July-2001, V.Franc, comments changed, bias mistake removed.
% 28-April-2001, V.Franc, flps counter added
% 10-April-2001, V. Franc, created

% small diagonal to make kernel matrix positive definite
ADD_DIAG = 1e-9;

flops(0);

if nargin < 5,
  error('Not enough number of input arguments.');
  return;
end

if nargin < 6 | isempty(epsilon)==1,
  epsilon = 1e-9;
end

% get dimension and number of points
[N,L] = size(X);

% labels {1,2} -> {1,-1}
Y = itosgn( I );     

% compute kernel matrix
K = kernel(X,X,ker,arg);
K=K.*(Y'*Y);
K = K + ADD_DIAG*eye(size(K));

% add 1/(2*C) on diagonal to make sepparable 
K = K + eye(size(K))/(2*C);

% transform the SVM problem to the Optimization toolbox format:
%
% SVM (Wolf dual) problem:
%   max Alpha*ones(1,L) - 0.5*Alpha'*K*Alpha
%   Alpha
%
%  subject to:
%   0 <= Alpha 
%   Alpha*Y' = 0  
%
% Quadratic programming in the Optimization toolbox
%   min 0.5*x'*H*x + f'*x   
%    x
% 
%  subject to:  
%    Aeq*x = beq, 
%    x <= UB, 
%    LB < x

Aeq = Y;
beq = 0;

f = -ones(L,1);       % Alpha

LB = zeros(L,1);      % 0 <= Alpha
UB = inf*ones(L,1);   % Alpha <= C
x0 = zeros(L,1);      % starting point

% call optimization toolbox
[Alpha,fval,exitflag] = qp(K, f, Aeq, beq, LB, UB, x0, 1);


% find the support vectors
sv_inx = find( Alpha > epsilon);
nsv = length(sv_inx);

% compute average value of the bias from the points on the margin,
% the patterns on the margin have Alpha: 0 < Alpha(i)
sv_margin = find( Alpha > epsilon );
nsv_margin = length( sv_margin );
 
if nsv_margin ~= 0,
   bias = sum(Y(sv_margin)'-K(sv_margin,sv_inx)*Alpha(sv_inx)...
          .*Y(sv_margin)')/nsv_margin;
else
   disp('Bias cannot be determinend - no pattern on margin.');
end

Alpha=Alpha(:)';
  
% compute margin between classes = 1/norm(w)
if nargout >= 6,
   K = K - eye(size(K))/(2*C);
   margin = 1/sqrt(Alpha*K*Alpha');    
end

% compute training classification error (empirical risk)
if nargout >= 7,
   K=K.*(Y'*Y);
   fpred = K*(Alpha(:).*Y(:)) + ones(length(Y),1)*bias;
   
   trn_err = length( find( (fpred(:).*Y(:)) < 0))/length(Y);      
end

flps =flops;   % take number of used flops

return;